From 2f7228ccdc51fc64637d582c2ae14ff8dfd6c940 Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Wed, 11 Jul 2007 17:23:09 +0100 Subject: [PATCH] Add HVM hardware feature suspend/resume. Signed-off-by: Ke Yu Signed-off-by: Kevin Tian Signed-off-by: Keir Fraser --- xen/arch/x86/acpi/power.c | 31 +++++++--- xen/arch/x86/hvm/hvm.c | 3 +- xen/arch/x86/hvm/svm/svm.c | 5 +- xen/arch/x86/hvm/vmx/vmcs.c | 93 ++++++++++++++++++++++++------ xen/arch/x86/hvm/vmx/vmx.c | 14 +---- xen/include/asm-x86/hvm/hvm.h | 20 +++++-- xen/include/asm-x86/hvm/vmx/vmcs.h | 3 + 7 files changed, 123 insertions(+), 46 deletions(-) diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c index e27850ad46..1bd8e2dfd4 100644 --- a/xen/arch/x86/acpi/power.c +++ b/xen/arch/x86/acpi/power.c @@ -82,10 +82,27 @@ static void device_power_up(void) console_resume(); } +static void freeze_domains(void) +{ + struct domain *d; + + for_each_domain(d) + if (d->domain_id != 0) + domain_pause(d); +} + +static void thaw_domains(void) +{ + struct domain *d; + + for_each_domain(d) + if (d->domain_id != 0) + domain_unpause(d); +} + /* Main interface to do xen specific suspend/resume */ int enter_state(u32 state) { - struct domain *d; unsigned long flags; int error; @@ -99,9 +116,9 @@ int enter_state(u32 state) if (!spin_trylock(&pm_lock)) return -EBUSY; - for_each_domain(d) - if (d->domain_id != 0) - domain_pause(d); + freeze_domains(); + + hvm_suspend_cpu(); pmprintk(XENLOG_INFO, "PM: Preparing system for %s sleep\n", acpi_states[state]); @@ -135,13 +152,11 @@ int enter_state(u32 state) Done: local_irq_restore(flags); - for_each_domain(d) - if (d->domain_id!=0) - domain_unpause(d); + hvm_resume_cpu(); + thaw_domains(); spin_unlock(&pm_lock); return error; - } /* diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 71ca313f8a..e5c5a79fdd 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -78,8 +78,7 @@ void hvm_enable(struct hvm_function_table *fns) void hvm_disable(void) { - if ( hvm_enabled ) - hvm_funcs.disable(); + hvm_suspend_cpu(); } void hvm_stts(struct vcpu *v) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index fbae194133..1596a55578 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -94,9 +94,8 @@ static void svm_inject_exception(struct vcpu *v, int trap, vmcb->eventinj = event; } -static void stop_svm(void) +static void svm_suspend_cpu(void) { - /* We turn off the EFER_SVME bit. */ write_efer(read_efer() & ~EFER_SVME); } @@ -974,7 +973,7 @@ static int svm_event_injection_faulted(struct vcpu *v) static struct hvm_function_table svm_function_table = { .name = "SVM", - .disable = stop_svm, + .suspend_cpu = svm_suspend_cpu, .domain_initialise = svm_domain_initialise, .domain_destroy = svm_domain_destroy, .vcpu_initialise = svm_vcpu_initialise, diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 9d446afb17..72d56ccf4a 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -45,7 +45,9 @@ u32 vmx_vmexit_control __read_mostly; u32 vmx_vmentry_control __read_mostly; bool_t cpu_has_vmx_ins_outs_instr_info __read_mostly; +static DEFINE_PER_CPU(struct vmcs_struct *, host_vmcs); static DEFINE_PER_CPU(struct vmcs_struct *, current_vmcs); +static DEFINE_PER_CPU(struct list_head, active_vmcs_list); static u32 vmcs_revision_id __read_mostly; @@ -185,34 +187,81 @@ static void vmx_free_vmcs(struct vmcs_struct *vmcs) static void __vmx_clear_vmcs(void *info) { struct vcpu *v = info; + struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; + + /* Otherwise we can nest (vmx_suspend_cpu() vs. vmx_clear_vmcs()). */ + ASSERT(!local_irq_is_enabled()); + + if ( arch_vmx->active_cpu == smp_processor_id() ) + { + __vmpclear(virt_to_maddr(arch_vmx->vmcs)); - __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs)); + arch_vmx->active_cpu = -1; + arch_vmx->launched = 0; - v->arch.hvm_vmx.active_cpu = -1; - v->arch.hvm_vmx.launched = 0; + list_del(&arch_vmx->active_list); - if ( v->arch.hvm_vmx.vmcs == this_cpu(current_vmcs) ) - this_cpu(current_vmcs) = NULL; + if ( arch_vmx->vmcs == this_cpu(current_vmcs) ) + this_cpu(current_vmcs) = NULL; + } } static void vmx_clear_vmcs(struct vcpu *v) { int cpu = v->arch.hvm_vmx.active_cpu; - if ( cpu == -1 ) - return; - - if ( cpu == smp_processor_id() ) - return __vmx_clear_vmcs(v); - - on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1); + if ( cpu != -1 ) + on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1); } static void vmx_load_vmcs(struct vcpu *v) { + unsigned long flags; + + local_irq_save(flags); + + if ( v->arch.hvm_vmx.active_cpu == -1 ) + { + list_add(&v->arch.hvm_vmx.active_list, &this_cpu(active_vmcs_list)); + v->arch.hvm_vmx.active_cpu = smp_processor_id(); + } + + ASSERT(v->arch.hvm_vmx.active_cpu == smp_processor_id()); + __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs)); - v->arch.hvm_vmx.active_cpu = smp_processor_id(); this_cpu(current_vmcs) = v->arch.hvm_vmx.vmcs; + + local_irq_restore(flags); +} + +void vmx_suspend_cpu(void) +{ + struct list_head *active_vmcs_list = &this_cpu(active_vmcs_list); + unsigned long flags; + + local_irq_save(flags); + + while ( !list_empty(active_vmcs_list) ) + __vmx_clear_vmcs(list_entry(active_vmcs_list->next, + struct vcpu, arch.hvm_vmx.active_list)); + + if ( read_cr4() & X86_CR4_VMXE ) + { + __vmxoff(); + clear_in_cr4(X86_CR4_VMXE); + } + + local_irq_restore(flags); +} + +void vmx_resume_cpu(void) +{ + if ( !read_cr4() & X86_CR4_VMXE ) + { + set_in_cr4(X86_CR4_VMXE); + if ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) ) + BUG(); + } } void vmx_vmcs_enter(struct vcpu *v) @@ -247,12 +296,17 @@ void vmx_vmcs_exit(struct vcpu *v) struct vmcs_struct *vmx_alloc_host_vmcs(void) { - return vmx_alloc_vmcs(); + ASSERT(this_cpu(host_vmcs) == NULL); + this_cpu(host_vmcs) = vmx_alloc_vmcs(); + INIT_LIST_HEAD(&this_cpu(active_vmcs_list)); + return this_cpu(host_vmcs); } void vmx_free_host_vmcs(struct vmcs_struct *vmcs) { + ASSERT(vmcs == this_cpu(host_vmcs)); vmx_free_vmcs(vmcs); + this_cpu(host_vmcs) = NULL; } struct xgt_desc { @@ -451,12 +505,17 @@ static void construct_vmcs(struct vcpu *v) int vmx_create_vmcs(struct vcpu *v) { - if ( v->arch.hvm_vmx.vmcs == NULL ) + struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; + + if ( arch_vmx->vmcs == NULL ) { - if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL ) + if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL ) return -ENOMEM; - __vmx_clear_vmcs(v); + INIT_LIST_HEAD(&arch_vmx->active_list); + __vmpclear(virt_to_maddr(arch_vmx->vmcs)); + arch_vmx->active_cpu = -1; + arch_vmx->launched = 0; } construct_vmcs(v); diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index dbe9577103..5cb11863e4 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -907,15 +907,6 @@ static void vmx_ctxt_switch_to(struct vcpu *v) vmx_restore_dr(v); } -static void stop_vmx(void) -{ - if ( !(read_cr4() & X86_CR4_VMXE) ) - return; - - __vmxoff(); - clear_in_cr4(X86_CR4_VMXE); -} - static void vmx_store_cpu_guest_regs( struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs) { @@ -1244,7 +1235,6 @@ static void disable_intercept_for_msr(u32 msr) static struct hvm_function_table vmx_function_table = { .name = "VMX", - .disable = stop_vmx, .domain_initialise = vmx_domain_initialise, .domain_destroy = vmx_domain_destroy, .vcpu_initialise = vmx_vcpu_initialise, @@ -1271,7 +1261,9 @@ static struct hvm_function_table vmx_function_table = { .inject_exception = vmx_inject_exception, .init_ap_context = vmx_init_ap_context, .init_hypercall_page = vmx_init_hypercall_page, - .event_injection_faulted = vmx_event_injection_faulted + .event_injection_faulted = vmx_event_injection_faulted, + .suspend_cpu = vmx_suspend_cpu, + .resume_cpu = vmx_resume_cpu, }; int start_vmx(void) diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 12cfcefdb0..10db897ef3 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -71,11 +71,6 @@ enum hvm_intack { struct hvm_function_table { char *name; - /* - * Disable HVM functionality - */ - void (*disable)(void); - /* * Initialise/destroy HVM domain/vcpu resources */ @@ -160,6 +155,9 @@ struct hvm_function_table { void (*init_hypercall_page)(struct domain *d, void *hypercall_page); int (*event_injection_faulted)(struct vcpu *v); + + void (*suspend_cpu)(void); + void (*resume_cpu)(void); }; extern struct hvm_function_table hvm_funcs; @@ -316,4 +314,16 @@ static inline int hvm_event_injection_faulted(struct vcpu *v) /* These exceptions must always be intercepted. */ #define HVM_TRAP_MASK (1U << TRAP_machine_check) +static inline void hvm_suspend_cpu(void) +{ + if ( hvm_funcs.suspend_cpu ) + hvm_funcs.suspend_cpu(); +} + +static inline void hvm_resume_cpu(void) +{ + if ( hvm_funcs.resume_cpu ) + hvm_funcs.resume_cpu(); +} + #endif /* __ASM_X86_HVM_HVM_H__ */ diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index 40a784458f..fbd12a248a 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -28,6 +28,8 @@ extern int start_vmx(void); extern void vmcs_dump_vcpu(void); extern void vmx_init_vmcs_config(void); extern void setup_vmcs_dump(void); +extern void vmx_suspend_cpu(void); +extern void vmx_resume_cpu(void); struct vmcs_struct { u32 vmcs_revision_id; @@ -59,6 +61,7 @@ struct arch_vmx_struct { * - Activated on a CPU by VMPTRLD. Deactivated by VMCLEAR. * - Launched on active CPU by VMLAUNCH when current VMCS. */ + struct list_head active_list; int active_cpu; int launched; -- 2.30.2